MLE Fit

humanLie <- bs.final %>%
  filter(roleCurrent == "bullshitter") %>%
  mutate(condition = paste0(probabilityRed,"_",expt))

logitToProb <- function(logit){
  exp(logit) / (1+exp(logit))
}

probToLogit <- function(prob){
  log(prob / (1 - prob))
}

# w = b + a
# u = a / (b + a)


lieLogisticModel <- function(k, beta, mu){
  logodds = beta * (k-mu)
  logitToProb(pmin(10, pmax(-10, logodds)))
}

p.lie.k <- function(k, beta, mu){
  lieLogisticModel(k, beta, mu)
}


p.kstar.k <- function(k, kstar, alph){
  alph = logitToProb(alph)
  dbinom(kstar, 10, alph) / (1-dbinom(k, 10, alph))  
}

lieLogisticBinom <- function(k, kstar, beta, mu, alph){
  p.lie = p.lie.k(k, beta, mu)
  ifelse(k == kstar, 1 - p.lie, p.lie * p.kstar.k(k, kstar, alph))
}


#lieLogisticBinom(5,6,2,1,5,2,5,0)

nLL <- function(beta, mu0.2_4, alph0.2_4,
                      mu0.5_4, alph0.5_4,
                      mu0.8_4, alph0.8_4,
                      mu0.2_5, alph0.2_5,
                      mu0.5_5, alph0.5_5,
                      mu0.8_5, alph0.8_5){
  k = humanLie$drawnRed
  kstar = humanLie$reportedDrawn
  expt = humanLie$expt
  prob = humanLie$probabilityRed
  mu = case_when(
    prob == 0.2 & expt == "expt4" ~ mu0.2_4,
    prob == 0.5 & expt == "expt4" ~ mu0.5_4,
    prob == 0.8 & expt == "expt4" ~ mu0.8_4,
    prob == 0.2 & expt == "expt5" ~ mu0.2_5,
    prob == 0.5 & expt == "expt5" ~ mu0.5_5,
    prob == 0.8 & expt == "expt5" ~ mu0.8_5
   )
  alph = case_when(
    prob == 0.2 & expt == "expt4" ~ alph0.2_4,
    prob == 0.5 & expt == "expt4" ~ alph0.5_4,
    prob == 0.8 & expt == "expt4" ~ alph0.8_4,
    prob == 0.2 & expt == "expt5" ~ alph0.2_5,
    prob == 0.5 & expt == "expt5" ~ alph0.5_5,
    prob == 0.8 & expt == "expt5" ~ alph0.8_5
   )
  
  betas = ifelse(expt=="expt4", 1*beta, -1*beta)
  
  pred = lieLogisticBinom(k, kstar, betas, mu, alph)
  # likelihood of observed kstar for that k, given parameters
  neg.log.lik = -1*sum(log(pred))
  mus = c(mu0.2_4, mu0.5_4, mu0.8_4, mu0.2_5, mu0.5_5, mu0.8_5)
  alphas = c(alph0.2_4, alph0.5_4, alph0.8_4, alph0.2_5, alph0.5_5, alph0.8_5)
  #neg.log.prior = sum(.0001*(mus-5)^2)-abs(beta)+ sum(alphas^2)+u*10
  neg.log.prior = 0
  neg.log.lik+neg.log.prior
}

# as bernoulli instead of if statement


fit <- summary(mle(nLL,
           start=list(beta=rnorm(1, 0, 0.5),
                      mu0.2_4=rnorm(1, 5, 3),
                      alph0.2_4=rnorm(1, 0, 0.5),
                      mu0.5_4=rnorm(1, 5, 3),
                      alph0.5_4=rnorm(1, 0, 0.5),
                      mu0.8_4=rnorm(1, 5, 3),
                      alph0.8_4=rnorm(1, 0, 0.5),
                      mu0.2_5=rnorm(1, 5, 3),
                      alph0.2_5=rnorm(1, 0, 0.5),
                      mu0.5_5=rnorm(1, 5, 3),
                      alph0.5_5=rnorm(1, 0, 0.5),
                      mu0.8_5=rnorm(1, 5, 3),
                      alph0.8_5=rnorm(1, 0, 0.5)),
           method = "BFGS"))
fit
## Maximum likelihood estimation
## 
## Call:
## mle(minuslogl = nLL, start = list(beta = rnorm(1, 0, 0.5), mu0.2_4 = rnorm(1, 
##     5, 3), alph0.2_4 = rnorm(1, 0, 0.5), mu0.5_4 = rnorm(1, 5, 
##     3), alph0.5_4 = rnorm(1, 0, 0.5), mu0.8_4 = rnorm(1, 5, 3), 
##     alph0.8_4 = rnorm(1, 0, 0.5), mu0.2_5 = rnorm(1, 5, 3), alph0.2_5 = rnorm(1, 
##         0, 0.5), mu0.5_5 = rnorm(1, 5, 3), alph0.5_5 = rnorm(1, 
##         0, 0.5), mu0.8_5 = rnorm(1, 5, 3), alph0.8_5 = rnorm(1, 
##         0, 0.5)), method = "BFGS")
## 
## Coefficients:
##              Estimate Std. Error
## beta      -0.16862156 0.01073248
## mu0.2_4    1.07071628 0.33040092
## alph0.2_4 -0.60545517 0.02646479
## mu0.5_4   -0.55319475 0.46814335
## alph0.5_4  0.11374682 0.02839971
## mu0.8_4    0.24646654 0.55531204
## alph0.8_4  0.75871520 0.03384144
## mu0.2_5    6.44649855 0.38907535
## alph0.2_5 -0.77714668 0.02794583
## mu0.5_5    9.13711877 0.44651351
## alph0.5_5  0.05458827 0.03033608
## mu0.8_5    9.89331509 0.34045217
## alph0.8_5  0.41982069 0.02600061
## 
## -2 log L: 24509.8

P(k* | k)

bin2d graph

template <- data.frame(expt = NA,
                       probabilityRed = rep(0,11),
                       drawnRed = rep(0,11),
                       reportedDrawn = 0:10,
                       n = rep(0,11))

complete4.5 <- bs.final %>%
  filter(roleCurrent == "bullshitter") %>%
  group_by(expt, probabilityRed, drawnRed, reportedDrawn) %>%
  summarise(n=n()) 

lieCtFull <- bind_rows(template,complete4.5) %>%
  complete(expt, probabilityRed, drawnRed, reportedDrawn, fill=list(n=0)) %>%
  filter(!is.na(expt), probabilityRed != 0) %>%
  group_by(expt, probabilityRed, drawnRed) %>%
  mutate(total = sum(n),
         proportion = n / total)

lieCtFull %>%
  mutate(logcount = ifelse(n==0, -0.1, log(n))) %>%
  ggplot(aes(x=drawnRed, y=reportedDrawn, fill=logcount)) +
  geom_bin2d(stat="identity") +
  ggtitle("k* given k - results") +
  facet_grid(probabilityRed~expt)

lieMLE.pred <- data.frame(k=rep(rep(0:10,each=11),3*2), 
                          kstar=rep(0:10, 11*3*2),
                          p=as.factor(rep(c(rep(0.2,11*11), rep(0.5,11*11), rep(0.8,11*11)),2)),
                          expt=as.factor(c(rep("expt4",11*11*3), rep("expt5",11*11*3))),
                          betas=c(rep(fit@coef["beta","Estimate"],11*11*3*2)),
                          mu=c(rep(fit@coef["mu0.2_4","Estimate"],11*11),
                               rep(fit@coef["mu0.5_4","Estimate"],11*11),
                               rep(fit@coef["mu0.8_4","Estimate"],11*11),
                               rep(fit@coef["mu0.2_5","Estimate"],11*11),
                               rep(fit@coef["mu0.5_5","Estimate"],11*11),
                               rep(fit@coef["mu0.8_5","Estimate"],11*11)),
                          mu.se=c(rep(fit@coef["mu0.2_4","Std. Error"],11*11),
                                  rep(fit@coef["mu0.5_4","Std. Error"],11*11),
                                  rep(fit@coef["mu0.8_4","Std. Error"],11*11),
                                  rep(fit@coef["mu0.2_5","Std. Error"],11*11),
                                  rep(fit@coef["mu0.5_5","Std. Error"],11*11),
                                  rep(fit@coef["mu0.8_5","Std. Error"],11*11)),
                          alph=c(rep(fit@coef["alph0.2_4","Estimate"],11*11),
                                 rep(fit@coef["alph0.5_4","Estimate"],11*11),
                                 rep(fit@coef["alph0.8_4","Estimate"],11*11),
                                 rep(fit@coef["alph0.2_5","Estimate"],11*11),
                                 rep(fit@coef["alph0.5_5","Estimate"],11*11),
                                 rep(fit@coef["alph0.8_5","Estimate"],11*11)),
                          alph.se=c(rep(fit@coef["alph0.2_4","Std. Error"],11*11),
                                    rep(fit@coef["alph0.5_4","Std. Error"],11*11),
                                    rep(fit@coef["alph0.8_4","Std. Error"],11*11),
                                    rep(fit@coef["alph0.2_5","Std. Error"],11*11),
                                    rep(fit@coef["alph0.5_5","Std. Error"],11*11),
                                    rep(fit@coef["alph0.8_5","Std. Error"],11*11))) %>%
  mutate(betas = ifelse(expt=="expt5", -betas, betas),
         p.lie.k = p.lie.k(k, betas, mu),
         p.kstar.k=lieLogisticBinom(k, kstar, betas, mu, alph),
         logp.kstar.k = log(p.kstar.k))
ggplot(lieMLE.pred, aes(x=k, y=kstar, fill=logp.kstar.k)) +
  geom_tile(stat="identity") +
  ggtitle("k* given k - MLE") +
  facet_grid(p~expt)

ggplot(lieMLE.pred, aes(x=k, y=kstar, z=logp.kstar.k)) +
  geom_contour() +
  ggtitle("k* given k - MLE") +
  facet_grid(p~expt)

3D graph

matrDF <- lieCtFull %>%
  filter(expt == "expt4", probabilityRed == 0.2) %>%
  mutate(logcount = ifelse(n==0, -0.1, log(n))) %>%
  ungroup() %>%
  select(drawnRed, reportedDrawn, logcount) %>%
  spread(drawnRed, logcount) %>%
  select(-reportedDrawn) %>%
  as.matrix()

p <- plot_ly(z = ~matrDF) %>% add_surface()  
p
matrDF.MLE <- lieMLE.pred %>%
  filter(expt == "expt4", p == 0.2) %>%
  select(k, kstar, logp.kstar.k) %>%
  spread(k, logp.kstar.k) %>%
  select(-kstar) %>%
  as.matrix()

p.MLE <- plot_ly(z = ~matrDF.MLE) %>% add_surface()  
p.MLE

Mean k* | k

lieMLE.pred %>%
  group_by(p, expt, k) %>%
  summarise(mean.kstar = sum(kstar*p.kstar.k)) %>%
  ggplot(aes(x=k, y=mean.kstar, colour=p)) +
  geom_line() +
  facet_wrap(~expt)

exptLie <- humanLie %>%
  mutate(probabilityRed.txt = paste("p =", probabilityRed)) %>%
  group_by(probabilityRed.txt, expt, drawnRed, reportedDrawn) %>%
  summarise(n = n(),
            truth = sum(drawnRed == reportedDrawn)) %>%
  ungroup() %>%
  group_by(probabilityRed.txt, expt, drawnRed) %>%
  mutate(p.kstar.k = n/sum(n),
         fit="experiment")
lieMLE.2 <- data.frame(probabilityRed.txt = paste("p =", lieMLE.pred$p),
                               expt = lieMLE.pred$expt,
                               drawnRed = lieMLE.pred$k,
                               reportedDrawn = lieMLE.pred$kstar,
                               p.kstar.k = lieMLE.pred$p.kstar.k,
                               n = NA,
                               fit = "model")
lie.full <- bind_rows(exptLie, lieMLE.2)
## Warning in bind_rows_(x, .id): binding character and factor vector,
## coercing into character vector

## Warning in bind_rows_(x, .id): binding character and factor vector,
## coercing into character vector
lie.full %>%
  group_by(fit, probabilityRed.txt, expt, drawnRed) %>%
  summarise(mean.reported = sum(reportedDrawn*p.kstar.k)) %>%
  ggplot(aes(x=drawnRed, y=mean.reported, colour=probabilityRed.txt, linetype=fit)) +
  geom_line() +
  geom_abline(intercept = 0, slope = 1, colour="darkgray", linetype=4) +
  facet_wrap(~expt)

P(lie | k)

lieMLE.3 <- data.frame(probabilityRed.txt = paste("p =", lieMLE.pred$p),
                       expt = lieMLE.pred$expt,
                       drawnRed = lieMLE.pred$k,
                       p.lie.k = lieMLE.pred$p.lie.k,
                       se = NA,
                       n = NA,
                       fit = "model") %>%
  unique()
exptLie.3 <- humanLie %>%
  mutate(probabilityRed.txt = paste("p =", probabilityRed)) %>%
  group_by(probabilityRed.txt, expt, drawnRed) %>%
  summarise(p.lie.k = sum(drawnRed != reportedDrawn)/n(),
            se = sqrt(p.lie.k*(1-p.lie.k)/n()),
            n = n()) %>%
  mutate(fit="experiment")

lie.3.full <- bind_rows(lieMLE.3, exptLie.3)
## Warning in bind_rows_(x, .id): binding factor and character vector,
## coercing into character vector
## Warning in bind_rows_(x, .id): binding character and factor vector,
## coercing into character vector
## Warning in bind_rows_(x, .id): binding factor and character vector,
## coercing into character vector
## Warning in bind_rows_(x, .id): binding character and factor vector,
## coercing into character vector
ggplot(data=lie.3.full, aes(x=drawnRed, y=p.lie.k, colour=probabilityRed.txt, linetype=fit)) +
  geom_point(data=filter(lie.3.full, fit=="experiment")) +
  geom_line() +
  geom_errorbar(data=filter(lie.3.full, fit=="experiment"), aes(x=drawnRed, colour=probabilityRed.txt, min=p.lie.k-se, max=p.lie.k+se), width=.3) +
  scale_x_continuous("Actual Marbles Drawn", limits=c(-0.2,10.2)) +
  scale_y_continuous("Proportion Lie", limits=c(0,1)) +
  guides(colour=guide_legend(title="")) +
  facet_wrap(~expt) +
  theme_minimal()

MLEparams <- lieMLE.pred %>%
  group_by(p, expt) %>%
  select(mu, mu.se, alph, alph.se) %>%
  unique()
## Adding missing grouping variables: `p`, `expt`
MLEparams
## # A tibble: 6 x 6
## # Groups:   p, expt [6]
##   p     expt      mu mu.se    alph alph.se
##   <fct> <fct>  <dbl> <dbl>   <dbl>   <dbl>
## 1 0.2   expt4  1.07  0.330 -0.605   0.0265
## 2 0.5   expt4 -0.553 0.468  0.114   0.0284
## 3 0.8   expt4  0.246 0.555  0.759   0.0338
## 4 0.2   expt5  6.45  0.389 -0.777   0.0279
## 5 0.5   expt5  9.14  0.447  0.0546  0.0303
## 6 0.8   expt5  9.89  0.340  0.420   0.0260
# one sided 2 sample t test with pooled variance
wald.z.test <- function(m1, sd1, m2, sd2){
  z <- (m1 - m2) / sqrt(sd1^2 + sd2^2)
  p <- pnorm(abs(z), lower.tail=F) 
  signif <- p < .05
  return(data.frame(m1, sd1, m2, sd2, z, p, signif))
}

#Comparing mus

## expt 4: p=0.5 vs p=0.2
wald.z.test(MLEparams$mu[2], MLEparams$mu.se[2], MLEparams$mu[1], MLEparams$mu.se[1])
##           m1       sd1       m2       sd2         z           p signif
## 1 -0.5531948 0.4681433 1.070716 0.3304009 -2.834077 0.002297915   TRUE
## expt 4: p=0.5 vs p=0.8
wald.z.test(MLEparams$mu[2], MLEparams$mu.se[2], MLEparams$mu[3], MLEparams$mu.se[3])
##           m1       sd1        m2      sd2         z         p signif
## 1 -0.5531948 0.4681433 0.2464665 0.555312 -1.100988 0.1354509  FALSE
## expt 5: p=0.5 vs p=0.2
wald.z.test(MLEparams$mu[5], MLEparams$mu.se[5], MLEparams$mu[4], MLEparams$mu.se[4])
##         m1       sd1       m2       sd2        z            p signif
## 1 9.137119 0.4465135 6.446499 0.3890753 4.543088 2.771809e-06   TRUE
## expt 5: p=0.5 vs p=0.8
wald.z.test(MLEparams$mu[5], MLEparams$mu.se[5], MLEparams$mu[6], MLEparams$mu.se[6])
##         m1       sd1       m2       sd2         z          p signif
## 1 9.137119 0.4465135 9.893315 0.3404522 -1.346744 0.08903139  FALSE
## p=0.2: expt4 vs expt5
wald.z.test(MLEparams$mu[1], MLEparams$mu.se[1], MLEparams$mu[4], MLEparams$mu.se[4])
##         m1       sd1       m2       sd2         z            p signif
## 1 1.070716 0.3304009 6.446499 0.3890753 -10.53176 3.083643e-26   TRUE
## p=0.5: expt4 vs expt5
wald.z.test(MLEparams$mu[2], MLEparams$mu.se[2], MLEparams$mu[5], MLEparams$mu.se[5])
##           m1       sd1       m2       sd2         z            p signif
## 1 -0.5531948 0.4681433 9.137119 0.4465135 -14.97867 5.061185e-51   TRUE
## p=0.8: expt4 vs expt5
wald.z.test(MLEparams$mu[3], MLEparams$mu.se[3], MLEparams$mu[6], MLEparams$mu.se[6])
##          m1      sd1       m2       sd2         z            p signif
## 1 0.2464665 0.555312 9.893315 0.3404522 -14.81016 6.297425e-50   TRUE
# Comparing alphas

## expt 4: p=0.5 vs p=0.2
wald.z.test(MLEparams$alph[2], MLEparams$alph.se[2], MLEparams$alph[1], MLEparams$alph.se[1])
##          m1        sd1         m2        sd2        z            p signif
## 1 0.1137468 0.02839971 -0.6054552 0.02646479 18.52698 6.256077e-77   TRUE
## expt 4: p=0.5 vs p=0.8
wald.z.test(MLEparams$alph[2], MLEparams$alph.se[2], MLEparams$alph[3], MLEparams$alph.se[3])
##          m1        sd1        m2        sd2         z            p signif
## 1 0.1137468 0.02839971 0.7587152 0.03384144 -14.59897 1.425556e-48   TRUE
## expt 5: p=0.5 vs p=0.2
wald.z.test(MLEparams$alph[5], MLEparams$alph.se[5], MLEparams$alph[4], MLEparams$alph.se[4])
##           m1        sd1         m2        sd2        z            p signif
## 1 0.05458827 0.03033608 -0.7771467 0.02794583 20.16514 9.909331e-91   TRUE
## expt 5: p=0.5 vs p=0.8
wald.z.test(MLEparams$alph[5], MLEparams$alph.se[5], MLEparams$alph[6], MLEparams$alph.se[6])
##           m1        sd1        m2        sd2         z            p signif
## 1 0.05458827 0.03033608 0.4198207 0.02600061 -9.141359 3.083611e-20   TRUE
## p=0.2: expt4 vs expt5
wald.z.test(MLEparams$alph[1], MLEparams$alph.se[1], MLEparams$alph[4], MLEparams$alph.se[4])
##           m1        sd1         m2        sd2        z            p signif
## 1 -0.6054552 0.02646479 -0.7771467 0.02794583 4.460867 4.081433e-06   TRUE
## p=0.5: expt4 vs expt5
wald.z.test(MLEparams$alph[2], MLEparams$alph.se[2], MLEparams$alph[5], MLEparams$alph.se[5])
##          m1        sd1         m2        sd2        z          p signif
## 1 0.1137468 0.02839971 0.05458827 0.03033608 1.423619 0.07727836  FALSE
## p=0.8: expt4 vs expt5
wald.z.test(MLEparams$alph[3], MLEparams$alph.se[3], MLEparams$alph[6], MLEparams$alph.se[6])
##          m1        sd1        m2        sd2       z           p signif
## 1 0.7587152 0.03384144 0.4198207 0.02600061 7.94103 1.00255e-15   TRUE

Logistic function P(lie | k) extended for k = -20:35 (so that you can see the logistic curve and not just a line)

x = -20:35
data.frame(p=as.factor(rep(c(rep(0.2,length(x)), rep(0.5,length(x)), rep(0.8,length(x))),2)),
           expt=as.factor(c(rep("expt4",length(x)*3), rep("expt5",length(x)*3))),
           x=rep(x,6),
           beta=fit@coef["beta","Estimate"],
           mu=c(rep(fit@coef["mu0.2_4","Estimate"],length(x)),
                rep(fit@coef["mu0.5_4","Estimate"],length(x)),
                rep(fit@coef["mu0.8_4","Estimate"],length(x)),
                rep(fit@coef["mu0.2_5","Estimate"],length(x)),
                rep(fit@coef["mu0.5_5","Estimate"],length(x)),
                rep(fit@coef["mu0.8_5","Estimate"],length(x)))) %>%
  mutate(beta = ifelse(expt=="expt5", -beta, beta),
         p.lie.k = p.lie.k(x, beta, mu)) %>%
  ggplot(aes(x=x, y=p.lie.k, colour=p)) +
  geom_line() +
  geom_vline(xintercept=0, linetype=2) +
  geom_vline(xintercept=10, linetype=2) +
  scale_y_continuous(limits=c(0,1)) +
  facet_wrap(~expt) +
  theme_minimal()

x = -20:35
data.frame(p=as.factor(rep(c(rep(0.2,length(x)), rep(0.5,length(x)), rep(0.8,length(x))),2)),
           expt=as.factor(c(rep("expt4",length(x)*3), rep("expt5",length(x)*3))),
           x=rep(x,6),
           beta=fit@coef["beta","Estimate"],
           mu=c(rep(fit@coef["mu0.2_4","Estimate"],length(x)),
                rep(fit@coef["mu0.5_4","Estimate"],length(x)),
                rep(fit@coef["mu0.8_4","Estimate"],length(x)),
                rep(fit@coef["mu0.2_5","Estimate"],length(x)),
                rep(fit@coef["mu0.5_5","Estimate"],length(x)),
                rep(fit@coef["mu0.8_5","Estimate"],length(x))),
           se=c(rep(fit@coef["mu0.2_4","Std. Error"],length(x)),
                rep(fit@coef["mu0.5_4","Std. Error"],length(x)),
                rep(fit@coef["mu0.8_4","Std. Error"],length(x)),
                rep(fit@coef["mu0.2_5","Std. Error"],length(x)),
                rep(fit@coef["mu0.5_5","Std. Error"],length(x)),
                rep(fit@coef["mu0.8_5","Std. Error"],length(x)))) %>%
  mutate(beta = ifelse(expt=="expt5", -beta, beta),
         p.lie.k.low = p.lie.k(x, beta, mu-se),
         p.lie.k.high = p.lie.k(x, beta, mu+se)) %>%
  ggplot(aes(x=x, colour=p)) +
  geom_line(aes(y=p.lie.k.low)) +
  geom_line(aes(y=p.lie.k.high)) +
  geom_vline(xintercept=0, linetype=2) +
  geom_vline(xintercept=10, linetype=2) +
  scale_y_continuous(limits=c(0,1)) +
  facet_grid(p~expt) +
  theme_minimal()

Binomial distribution of k* for each condition

lieColors = c("truth" = "springgreen3",
              "lie" = "red3")
lieMLE.pred %>%
  filter(expt=="expt4") %>%
  mutate(lie = ifelse(k != kstar, "lie", "truth")) %>%
  ggplot(aes(x=kstar, y=p.kstar.k, fill=lie)) +
  geom_bar(stat="identity") +
  ggtitle("Expt 4 k* | k") +
  scale_fill_manual(values=lieColors) +
  coord_flip() +
  facet_grid(p ~ k)

lieMLE.pred %>%
  filter(expt=="expt5") %>%
  mutate(lie = ifelse(k != kstar, "lie", "truth")) %>%
  ggplot(aes(x=kstar, y=p.kstar.k, fill=lie)) +
  geom_bar(stat="identity") +
  ggtitle("Expt 5 k* | k") +
  scale_fill_manual(values=lieColors) +
  coord_flip() +
  facet_grid(p ~ k)

Binomial distribution of k* for each condition, before normalizing for large spikes at k == k*

lieMLE.pred %>%
  select(kstar, p, expt, alph) %>%
  unique() %>%
  mutate(binom.unnorm = dbinom(kstar, 10, logitToProb(alph))) %>%
  ggplot(aes(x=kstar, y=binom.unnorm)) +
  geom_bar(stat="identity", fill="lightblue", colour="gray") +
  geom_vline(aes(xintercept=logitToProb(alph)*10)) +
  facet_grid(p ~ expt) +
  theme_bw()